*PRZETWARZANIE OBRAZU*
In [2]:
import os
import gzip
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.filters import frangi
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score
import pandas as pd
import matplotlib.patches as mpatches
base_dir = "."
images_dir = os.path.join(base_dir, "star_images")
ah_masks_dir = os.path.join(base_dir, "labels_ah")
vk_masks_dir = os.path.join(base_dir, "lables_vk")
def load_ppm_gz(path_to_gz):
with gzip.open(path_to_gz, 'rb') as f:
buf = f.read()
arr = np.frombuffer(buf, dtype=np.uint8)
return cv2.imdecode(arr, cv2.IMREAD_UNCHANGED)
def apply_preprocessing(img_rgb):
gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
return clahe.apply(gray)
def create_field_of_view_mask(gray):
_, th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kern = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
close = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kern)
open_ = cv2.morphologyEx(close, cv2.MORPH_OPEN, kern)
return (open_ > 0).astype(np.uint8)
def postprocess_mask(bin_mask):
kern = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
close = cv2.morphologyEx(bin_mask, cv2.MORPH_CLOSE, kern)
open_ = cv2.morphologyEx(close, cv2.MORPH_OPEN, kern)
return open_
def get_metrics(gt, pred):
y_true = gt.flatten()
y_pred = pred.flatten()
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
acc = accuracy_score(y_true, y_pred)
sens = recall_score(y_true, y_pred)
spec = tn / (tn + fp) if (tn+fp)>0 else 0
return acc, sens, spec, (sens+spec)/2, (tn, fp, fn, tp)
image_files = sorted(f for f in os.listdir(images_dir) if f.endswith(".ppm.gz"))
all_results = []
total_cm = np.array([0,0,0,0], dtype=int)
for file in image_files:
name = file.replace(".ppm.gz","")
img = load_ppm_gz(os.path.join(images_dir, file))
mask_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz"))
vk_path = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
has_vk = os.path.exists(vk_path)
mask_ah_bin = (mask_ah>0).astype(np.uint8)
if has_vk:
mask_vk = load_ppm_gz(vk_path)
mask_vk_bin = (mask_vk>0).astype(np.uint8)
gt_mask = ((mask_ah_bin+mask_vk_bin)>=1).astype(np.uint8)
else:
gt_mask = mask_ah_bin
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(img_rgb)
fov = create_field_of_view_mask(gray)
vessel_prob = frangi(gray.astype(np.float64)/255.0)
vessel_bin_raw = (vessel_prob>0.03).astype(np.uint8)
vessel_bin_masked = postprocess_mask(vessel_bin_raw * fov)
acc, sens, spec, bal, cm = get_metrics(gt_mask, vessel_bin_masked)
total_cm += np.array(cm)
all_results.append({
"file": file,
"name": name,
"accuracy": acc,
"sensitivity": sens,
"specificity": spec,
"balanced_score": bal
})
top3 = sorted(all_results, key=lambda x: x["balanced_score"], reverse=True)[:3]
bad2 = sorted(all_results, key=lambda x: x["balanced_score"], reverse=True)[-2:]
for res in top3:
file = res["file"]
name = res["name"]
print(f"\n>> {name} balanced_score = {res['balanced_score']:.3f}")
img = load_ppm_gz(os.path.join(images_dir, file))
mask_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz"))
vk_path = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
mask_ah_bin = (mask_ah>0).astype(np.uint8)
if os.path.exists(vk_path):
mask_vk = load_ppm_gz(vk_path)
mask_vk_bin = (mask_vk>0).astype(np.uint8)
gt_mask = ((mask_ah_bin+mask_vk_bin)>=1).astype(np.uint8)
else:
gt_mask = mask_ah_bin
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(img_rgb)
fov = create_field_of_view_mask(gray)
vessel_prob = frangi(gray.astype(np.float64)/255.0)
vessel_bin_raw = (vessel_prob>0.03).astype(np.uint8)
vessel_bin_masked = postprocess_mask(vessel_bin_raw * fov)
acc, sens, spec, bal, (tn,fp,fn,tp) = get_metrics(gt_mask, vessel_bin_masked)
tn_mask = (gt_mask==0) & (vessel_bin_masked==0)
fp_mask = (gt_mask==0) & (vessel_bin_masked==1)
fn_mask = (gt_mask==1) & (vessel_bin_masked==0)
tp_mask = (gt_mask==1) & (vessel_bin_masked==1)
overlay = img_rgb.copy()
overlay[fp_mask] = [255,255,0]
overlay[fn_mask] = [0,0,255]
overlay[tp_mask] = [0,255,0]
magenta_patch = mpatches.Patch(color='yellow', label='FP')
blue_patch = mpatches.Patch(color='blue', label='FN')
green_patch = mpatches.Patch(color='green', label='TP')
fig, axs = plt.subplots(1,4,figsize=(20,5))
axs[0].imshow(img_rgb); axs[0].set_title('oryginał'); axs[0].axis('off')
axs[1].imshow(gt_mask, cmap='gray'); axs[1].set_title('maska ekspercka'); axs[1].axis('off')
axs[2].imshow(overlay); axs[2].set_title('wynik: overlay TP/FP/FN'); axs[2].axis('off')
axs[2].legend(handles=[green_patch, magenta_patch, blue_patch], loc='lower right')
axs[3].imshow(vessel_bin_masked, cmap='gray'); axs[3].set_title('wynik: maska seg.'); axs[3].axis('off')
plt.suptitle(f"TN:{tn} FP:{fp} FN:{fn} TP:{tp} | acc:{acc:.3f} sens:{sens:.3f} spec:{spec:.3f} bal:{bal:.3f}", fontsize=12, y=1.02)
plt.tight_layout()
plt.show()
for res in bad2:
file = res["file"]
name = res["name"]
print(f"\n>> {name} balanced_score = {res['balanced_score']:.3f}")
img = load_ppm_gz(os.path.join(images_dir, file))
mask_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz"))
vk_path = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
mask_ah_bin = (mask_ah>0).astype(np.uint8)
if os.path.exists(vk_path):
mask_vk = load_ppm_gz(vk_path)
mask_vk_bin = (mask_vk>0).astype(np.uint8)
gt_mask = ((mask_ah_bin+mask_vk_bin)>=1).astype(np.uint8)
else:
gt_mask = mask_ah_bin
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(img_rgb)
fov = create_field_of_view_mask(gray)
vessel_prob = frangi(gray.astype(np.float64)/255.0)
vessel_bin_raw = (vessel_prob>0.03).astype(np.uint8)
vessel_bin_masked = postprocess_mask(vessel_bin_raw * fov)
acc, sens, spec, bal, (tn,fp,fn,tp) = get_metrics(gt_mask, vessel_bin_masked)
tn_mask = (gt_mask==0) & (vessel_bin_masked==0)
fp_mask = (gt_mask==0) & (vessel_bin_masked==1)
fn_mask = (gt_mask==1) & (vessel_bin_masked==0)
tp_mask = (gt_mask==1) & (vessel_bin_masked==1)
overlay = img_rgb.copy()
overlay[fp_mask] = [255,255,0]
overlay[fn_mask] = [0,0,255]
overlay[tp_mask] = [0,255,0]
yellow_patch = mpatches.Patch(color='yellow', label='FP')
blue_patch = mpatches.Patch(color='blue', label='FN')
green_patch = mpatches.Patch(color='green', label='TP')
fig, axs = plt.subplots(1,4,figsize=(20,5))
axs[0].imshow(img_rgb); axs[0].set_title('oryginał'); axs[0].axis('off')
axs[1].imshow(gt_mask, cmap='gray'); axs[1].set_title('maska ekspercka'); axs[1].axis('off')
axs[2].imshow(overlay); axs[2].set_title('overlay TP/FP/FN'); axs[2].axis('off')
axs[2].legend(handles=[green_patch, yellow_patch, blue_patch], loc='lower right')
axs[3].imshow(vessel_bin_masked, cmap='gray'); axs[3].set_title('maska seg.'); axs[3].axis('off')
plt.suptitle(f"TN:{tn} FP:{fp} FN:{fn} TP:{tp} | Acc:{acc:.3f} Sens:{sens:.3f} Spec:{spec:.3f} Bal:{bal:.3f}", fontsize=12, y=1.02)
plt.tight_layout()
plt.show()
df = pd.DataFrame(all_results)
print("\n ZBIORCZE METRYKI DLA WSZYSTKICH OBRAZÓW")
print(df.describe().loc[['mean','std','min','max'], ['accuracy','sensitivity','specificity','balanced_score']])
tn, fp, fn, tp = total_cm
accuracy_total = (tp+tn)/(tp+tn+fp+fn)
sensitivity_total = tp/(tp+fn) if tp+fn>0 else 0
specificity_total = tn/(tn+fp) if tn+fp>0 else 0
balanced_total = (sensitivity_total+specificity_total)/2
print(f"\nŁączna macierz pomyłek: [[TN:{tn}, FP:{fp}] [FN:{fn}, TP:{tp}]]")
print(f"Zbiorcze metryki:\n Accuracy: {accuracy_total:.4f}\n Sensitivity: {sensitivity_total:.4f}\n Specificity: {specificity_total:.4f}\n Balanced: {balanced_total:.4f}")
>> im0163 balanced_score = 0.736
>> im0255 balanced_score = 0.700
>> im0236 balanced_score = 0.695
>> im0004 balanced_score = 0.520
>> im0324 balanced_score = 0.519
ZBIORCZE METRYKI DLA WSZYSTKICH OBRAZÓW
accuracy sensitivity specificity balanced_score
mean 0.910214 0.233222 0.997268 0.615245
std 0.018406 0.124003 0.004610 0.061634
min 0.872357 0.037384 0.978887 0.518585
max 0.941216 0.473566 0.999884 0.736288
Łączna macierz pomyłek: [[TN:7464124, FP:20508] [FN:739983, TP:245385]]
Zbiorcze metryki:
Accuracy: 0.9102
Sensitivity: 0.2490
Specificity: 0.9973
Balanced: 0.6231
*K-NN*
In [ ]:
import os
import gzip
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import moments_central, moments_hu
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import (
confusion_matrix, accuracy_score, recall_score,
roc_curve
)
from sklearn.model_selection import train_test_split
base_dir = "."
images_dir = os.path.join(base_dir, "star_images")
ah_masks_dir = os.path.join(base_dir, "labels_ah")
vk_masks_dir = os.path.join(base_dir, "lables_vk")
files = sorted(f for f in os.listdir(images_dir) if f.endswith(".ppm.gz"))
train_f = files[:10]
test_f = files[10:]
def create_fov(gray):
_, t = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
m = cv2.morphologyEx(t, cv2.MORPH_CLOSE, k)
m = cv2.morphologyEx(m, cv2.MORPH_OPEN, k)
return (m > 0).astype(np.uint8)
def get_metrics(gt, pred):
y_t, y_p = gt.flatten(), pred.flatten()
tn, fp, fn, tp = confusion_matrix(y_t, y_p).ravel()
acc = accuracy_score(y_t, y_p)
sens = recall_score(y_t, y_p, zero_division=0)
spec = tn / (tn + fp) if (tn + fp) > 0 else 0
return acc, sens, spec, (tn, fp, fn, tp)
def extract_features_from_patch(patch):
p = patch.astype(np.float64)
v = np.var(p)
mean_intensity = np.mean(p)
gx = cv2.Sobel(p, cv2.CV_64F, 1, 0, ksize=3)
gy = cv2.Sobel(p, cv2.CV_64F, 0, 1, ksize=3)
grad_mean = np.mean(np.sqrt(gx**2 + gy**2))
if v == 0.0:
hu = [0.0]*7
else:
mc = moments_central(p)
hu = moments_hu(mc)
hu = [0.0 if np.isnan(x) or np.isinf(x) else x for x in hu]
return [v, mean_intensity, grad_mean] + hu
def extract_data(gray, mask, patch_size=5, stride=1):
X, y, coords = [], [], []
off = patch_size // 2
h, w = gray.shape
for i in range(off, h-off, stride):
for j in range(off, w-off, stride):
if mask[i, j] not in (0, 1):
continue
patch = gray[i-off:i+off+1, j-off:j+off+1]
feats = extract_features_from_patch(patch)
X.append(feats)
y.append(mask[i, j])
coords.append((i, j))
return np.array(X), np.array(y), coords
# zbior trening
Xtr, ytr = [], []
for fn in train_f:
name = fn.replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, fn))
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p) > 0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int) + m_vk.astype(int)) >= 1).astype(np.uint8)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_fov(gray)
gray *= fov
Xf, yf, _ = extract_data(gray, mask, patch_size=5, stride=1)
Xtr.extend(Xf)
ytr.extend(yf)
Xtr = np.nan_to_num(np.array(Xtr))
ytr = np.array(ytr)
# balansowanie klas
pos_idx = np.where(ytr == 1)[0]
neg_idx = np.where(ytr == 0)[0]
neg_sel = np.random.choice(neg_idx, size=len(pos_idx), replace=False)
sel = np.concatenate([pos_idx, neg_sel])
Xbal, ybal = Xtr[sel], ytr[sel]
#train/val i trenowanie k-NN
X_train, X_val, y_train, y_val = train_test_split(
Xbal, ybal, test_size=0.3, random_state=42, stratify=ybal
)
clf = make_pipeline(
StandardScaler(),
KNeighborsClassifier(n_neighbors=1, weights='distance')
)
clf.fit(X_train, y_train)
probs_val = clf.predict_proba(X_val)[:,1]
fpr, tpr, thr = roc_curve(y_val, probs_val)
best_idx = np.argmax(tpr - fpr)
best_thr = thr[best_idx]
print(f"Wybrany próg z ROC (Youden’s J): {best_thr:.3f}")
test_fn = test_f[0]
name = test_fn.replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, test_fn))
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p) > 0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int) + m_vk.astype(int)) >= 1).astype(np.uint8)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_fov(gray)
gray *= fov
Xte, yte, coords = extract_data(gray, mask, patch_size=5, stride=1)
Xte = np.nan_to_num(Xte)
probs_test = clf.predict_proba(Xte)[:,1]
ypred = (probs_test >= best_thr).astype(np.uint8)
pred_mask = np.zeros_like(mask)
for (i, j), v in zip(coords, ypred):
pred_mask[i, j] = v
pred_mask = postprocess_mask(pred_mask)
tn_mask = (mask == 0) & (pred_mask == 0) & (fov == 1)
fp_mask = (mask == 0) & (pred_mask == 1) & (fov == 1)
fn_mask = (mask == 1) & (pred_mask == 0) & (fov == 1)
tp_mask = (mask == 1) & (pred_mask == 1) & (fov == 1)
overlay = rgb.copy()
overlay[tp_mask] = [0, 255, 0]
overlay[fp_mask] = [255, 255, 0]
overlay[fn_mask] = [0, 0, 255]
acc, sens, spec, (tn, fp, fn, tp) = get_metrics(mask, pred_mask)
print(f"\n{name}")
print(f"Accuracy: {acc:.4f}")
print(f"Sensitivity: {sens:.4f}")
print(f"Specificity: {spec:.4f}")
print(f"Confusion matrix:\n[[TN:{tn}, FP:{fp}]\n [FN:{fn}, TP:{tp}]]")
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs[0].imshow(rgb); axs[0].set_title("Oryginał"); axs[0].axis('off')
axs[1].imshow(mask, cmap='gray'); axs[1].set_title("Maska ekspercka"); axs[1].axis('off')
axs[2].imshow(overlay); axs[2].set_title("Predykcja k-NN"); axs[2].axis('off')
plt.suptitle(f"{name} Acc:{acc:.3f}, Sens:{sens:.3f}, Spec:{spec:.3f}")
green = mpatches.Patch(color='green', label='TP')
yellow = mpatches.Patch(color='yellow', label='FP')
blue = mpatches.Patch(color='blue', label='FN')
axs[2].legend(handles=[green, yellow, blue], loc='lower right')
plt.tight_layout()
plt.show()
Wybrany próg z ROC (Youden’s J): 1.000 im0162 Accuracy: 0.8049 Sensitivity: 0.9140 Specificity: 0.7905 Confusion matrix: [[TN:295564, FP:78350] [FN:4264, TP:45322]]
*RANDOM FOREST*
In [ ]:
import os
import gzip
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.measure import moments_central, moments_hu
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
confusion_matrix, accuracy_score, recall_score,
roc_curve
)
from sklearn.model_selection import train_test_split
base_dir = "."
images_dir = os.path.join(base_dir, "star_images")
ah_masks_dir = os.path.join(base_dir, "labels_ah")
vk_masks_dir = os.path.join(base_dir, "lables_vk")
files = sorted(f for f in os.listdir(images_dir) if f.endswith(".ppm.gz"))
train_f = files[:10]
test_f = files[10:]
def create_fov(gray):
_, t = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
m = cv2.morphologyEx(t, cv2.MORPH_CLOSE, k)
m = cv2.morphologyEx(m, cv2.MORPH_OPEN, k)
return (m > 0).astype(np.uint8)
def get_metrics(gt, pred):
y_t, y_p = gt.flatten(), pred.flatten()
tn, fp, fn, tp = confusion_matrix(y_t, y_p).ravel()
acc = accuracy_score(y_t, y_p)
sens = recall_score(y_t, y_p, zero_division=0)
spec = tn / (tn + fp) if (tn + fp) > 0 else 0
return acc, sens, spec, (tn, fp, fn, tp)
def postprocess_mask(mask_bin):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
opened = cv2.morphologyEx(mask_bin, cv2.MORPH_OPEN, kernel)
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel)
return closed
def extract_features_from_patch(patch):
p = patch.astype(np.float64)
v = np.var(p)
mean_intensity = np.mean(p)
gx = cv2.Sobel(p, cv2.CV_64F, 1, 0, ksize=3)
gy = cv2.Sobel(p, cv2.CV_64F, 0, 1, ksize=3)
grad_mean = np.mean(np.sqrt(gx**2 + gy**2))
if v == 0.0:
hu = [0.0]*7
else:
mc = moments_central(p)
hu = moments_hu(mc)
hu = [0.0 if np.isnan(x) or np.isinf(x) else x for x in hu]
return [v, mean_intensity, grad_mean] + hu
def extract_data(gray, mask, patch_size=5, stride=1):
X, y, coords = [], [], []
off = patch_size // 2
h, w = gray.shape
for i in range(off, h-off, stride):
for j in range(off, w-off, stride):
if mask[i, j] not in (0, 1):
continue
patch = gray[i-off:i+off+1, j-off:j+off+1]
feats = extract_features_from_patch(patch)
X.append(feats)
y.append(mask[i, j])
coords.append((i, j))
return np.array(X), np.array(y), coords
Xtr, ytr = [], []
for fn in train_f:
name = fn.replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, fn))
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p) > 0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int) + m_vk.astype(int)) >= 1).astype(np.uint8)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_fov(gray)
gray *= fov
Xf, yf, _ = extract_data(gray, mask, patch_size=5, stride=1)
Xtr.extend(Xf)
ytr.extend(yf)
Xtr = np.nan_to_num(np.array(Xtr))
ytr = np.array(ytr)
pos_idx = np.where(ytr == 1)[0]
neg_idx = np.where(ytr == 0)[0]
neg_sel = np.random.choice(neg_idx, size=len(pos_idx), replace=False)
sel = np.concatenate([pos_idx, neg_sel])
Xbal, ybal = Xtr[sel], ytr[sel]
X_train, X_val, y_train, y_val = train_test_split(
Xbal, ybal, test_size=0.3, random_state=42, stratify=ybal
)
clf = make_pipeline(
StandardScaler(),
RandomForestClassifier(
n_estimators=100,
class_weight='balanced',
random_state=42,
n_jobs=-1
)
)
clf.fit(X_train, y_train)
probs_val = clf.predict_proba(X_val)[:,1]
fpr, tpr, thr = roc_curve(y_val, probs_val)
best_idx = np.argmax(tpr - fpr)
best_thr = thr[best_idx]
print(f"Wybrany próg z ROC (Youden’s J): {best_thr:.3f}")
test_fn = test_f[0]
name = test_fn.replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, test_fn))
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p) > 0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int) + m_vk.astype(int)) >= 1).astype(np.uint8)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_fov(gray)
gray *= fov
Xte, yte, coords = extract_data(gray, mask, patch_size=5, stride=1)
Xte = np.nan_to_num(Xte)
probs_test = clf.predict_proba(Xte)[:,1]
ypred = (probs_test >= best_thr).astype(np.uint8)
pred_mask = np.zeros_like(mask)
for (i, j), v in zip(coords, ypred):
pred_mask[i, j] = v
pred_mask = postprocess_mask(pred_mask)
tn_mask = (mask == 0) & (pred_mask == 0) & (fov == 1)
fp_mask = (mask == 0) & (pred_mask == 1) & (fov == 1)
fn_mask = (mask == 1) & (pred_mask == 0) & (fov == 1)
tp_mask = (mask == 1) & (pred_mask == 1) & (fov == 1)
overlay = rgb.copy()
overlay[tp_mask] = [0, 255, 0]
overlay[fp_mask] = [255, 255, 0]
overlay[fn_mask] = [0, 0, 255]
acc, sens, spec, (tn, fp, fn, tp) = get_metrics(mask, pred_mask)
print(f"\n{name}")
print(f"Accuracy: {acc:.4f}")
print(f"Sensitivity: {sens:.4f}")
print(f"Specificity: {spec:.4f}")
print(f"Confusion matrix:\n[[TN:{tn}, FP:{fp}]\n [FN:{fn}, TP:{tp}]]")
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs[0].imshow(rgb); axs[0].set_title("Oryginał"); axs[0].axis('off')
axs[1].imshow(mask, cmap='gray'); axs[1].set_title("Maska ekspercka"); axs[1].axis('off')
axs[2].imshow(overlay); axs[2].set_title("Predykcja RF"); axs[2].axis('off')
plt.suptitle(f"{name} Acc:{acc:.3f}, Sens:{sens:.3f}, Spec:{spec:.3f}")
green = mpatches.Patch(color='green', label='TP')
yellow = mpatches.Patch(color='yellow', label='FP')
blue = mpatches.Patch(color='blue', label='FN')
axs[2].legend(handles=[green, yellow, blue], loc='lower right')
plt.tight_layout()
plt.show()
Wybrany próg z ROC (Youden’s J): 0.516 im0162 Accuracy: 0.9324 Sensitivity: 0.8275 Specificity: 0.9463 Confusion matrix: [[TN:353853, FP:20061] [FN:8556, TP:41030]]
*SIEĆ NA PATCHACH*
In [ ]:
import os
import gzip
import cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, roc_curve
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical
def create_fov(gray):
_, t = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
m = cv2.morphologyEx(t, cv2.MORPH_CLOSE, k)
m = cv2.morphologyEx(m, cv2.MORPH_OPEN, k)
return (m > 0).astype(np.uint8)
def overlay_vessels(rgb, vessel_mask, fov_mask):
out = rgb.copy()
mask_overlay = (vessel_mask == 1) & (fov_mask == 1)
out[mask_overlay] = [0, 255, 0]
return out
def extract_patches(gray, mask, patch_size=5, stride=1):
offset = patch_size // 2
h, w = gray.shape
patches, labels, coords = [], [], []
for i in range(offset, h - offset, stride):
for j in range(offset, w - offset, stride):
if mask[i, j] not in (0, 1):
continue
patch = gray[i-offset:i+offset+1, j-offset:j+offset+1]
if patch.shape != (patch_size, patch_size):
continue
patches.append(patch)
labels.append(mask[i, j])
coords.append((i, j))
return np.array(patches), np.array(labels), coords
def build_patch_cnn(patch_size=5):
inp = Input(shape=(patch_size, patch_size, 1))
x = Conv2D(16, (3,3), activation='relu', padding='same')(inp)
x = MaxPooling2D((2,2))(x)
x = Conv2D(32, (3,3), activation='relu', padding='same')(x)
x = MaxPooling2D((2,2))(x)
x = Flatten()(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.3)(x)
out = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inp, outputs=out)
return model
base_dir = "."
images_dir = os.path.join(base_dir, "star_images")
ah_masks_dir = os.path.join(base_dir, "labels_ah")
vk_masks_dir = os.path.join(base_dir, "lables_vk")
files = sorted([f for f in os.listdir(images_dir) if f.endswith(".ppm.gz")])
train_f = files[:10]
test_f = files[10:]
patch_size = 5
stride = 1
X_patches, y_patches = [], []
for fn in train_f:
name = fn.replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, fn))
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_fov(gray)
gray *= fov
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p) > 0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int) + m_vk.astype(int)) >= 1).astype(np.uint8)
patches, labels, _ = extract_patches(gray, mask, patch_size=patch_size, stride=stride)
X_patches.append(patches)
y_patches.append(labels)
X_patches = np.vstack(X_patches)
y_patches = np.concatenate(y_patches)
pos_idx = np.where(y_patches == 1)[0]
neg_idx = np.where(y_patches == 0)[0]
n_pos = len(pos_idx)
n_neg = len(neg_idx)
np.random.seed(42)
neg_sel = np.random.choice(neg_idx, size=n_pos, replace=False)
sel = np.concatenate([pos_idx, neg_sel])
X_sel = X_patches[sel]
y_sel = y_patches[sel]
X_sel = X_sel.astype(np.float32) / 255.0
X_sel = np.expand_dims(X_sel, axis=-1) # kanał: N, 5,5,1
y_sel = y_sel.astype(np.uint8)
X_train, X_val, y_train, y_val = train_test_split(
X_sel, y_sel, test_size=0.2, random_state=42, stratify=y_sel
)
print(" przed treningiem CNN ")
print("X_train:", X_train.shape, "y_train:", y_train.shape)
print("X_val: ", X_val.shape, "y_val: ", y_val.shape)
print("Liczba pikseli klasa 0 w train:", np.sum(y_train==0),
"klasa 1 w train:", np.sum(y_train==1))
print("Liczba pikseli klasa 0 w val: ", np.sum(y_val==0),
"klasa 1 w val: ", np.sum(y_val==1))
model = build_patch_cnn(patch_size=patch_size)
model.compile(
optimizer=Adam(1e-4),
loss='binary_crossentropy',
metrics=['accuracy']
)
model.summary()
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True),
ModelCheckpoint("best_patch_cnn.keras", save_best_only=True),
ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=1e-6, verbose=1)
]
history = model.fit(
X_train, y_train,
validation_data=(X_val, y_val),
epochs=20,
batch_size=128,
callbacks=callbacks,
verbose=2
)
probs_val = model.predict(X_val).flatten() # kształt (n_val,)
fpr, tpr, thresholds = roc_curve(y_val, probs_val)
j_scores = tpr - fpr
best_idx = np.argmax(j_scores)
best_thr = thresholds[best_idx]
print(f"Wybrany próg z ROC (Youden’s J): {best_thr:.4f}")
test_fn = test_f[0]
name = test_fn.replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, test_fn))
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_fov(gray)
gray *= fov
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p) > 0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask_gt = ((m_ah.astype(int) + m_vk.astype(int)) >= 1).astype(np.uint8)
patches_test, labels_test, coords = extract_patches(gray, mask_gt, patch_size=patch_size, stride=stride)
patches_test = patches_test.astype(np.float32) / 255.0
patches_test = np.expand_dims(patches_test, axis=-1)
probs_test = model.predict(patches_test).flatten()
y_pred_labels = (probs_test >= best_thr).astype(np.uint8)
pred_mask = np.zeros_like(mask_gt, dtype=np.uint8)
for (i, j), lab in zip(coords, y_pred_labels):
pred_mask[i, j] = lab
pred_mask = postprocess_mask(pred_mask)
tn_mask = (mask_gt == 0) & (pred_mask == 0) & (fov == 1)
fp_mask = (mask_gt == 0) & (pred_mask == 1) & (fov == 1)
fn_mask = (mask_gt == 1) & (pred_mask == 0) & (fov == 1)
tp_mask = (mask_gt == 1) & (pred_mask == 1) & (fov == 1)
overlay = rgb.copy()
overlay[tp_mask] = [0, 255, 0]
overlay[fp_mask] = [255, 255, 0]
overlay[fn_mask] = [0, 0, 255]
acc, sens, spec, (tn, fp, fn, tp) = get_metrics(mask, pred_mask)
print(f"\n{name}")
print(f"Accuracy: {acc:.4f}")
print(f"Sensitivity: {sens:.4f}")
print(f"Specificity: {spec:.4f}")
print(f"Confusion matrix:\n[[TN:{tn}, FP:{fp}]\n [FN:{fn}, TP:{tp}]]")
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs[0].imshow(rgb); axs[0].set_title("Oryginał"); axs[0].axis('off')
axs[1].imshow(mask_gt, cmap='gray'); axs[1].set_title("Maska ekspercka"); axs[1].axis('off')
axs[2].imshow(overlay); axs[2].set_title("Predykcja CNN po patchach"); axs[2].axis('off')
plt.suptitle(f"{name} Acc:{acc:.3f}, Sens:{sens:.3f}, Spec:{spec:.3f}")
green = mpatches.Patch(color='green', label='TP')
yellow = mpatches.Patch(color='yellow', label='FP')
blue = mpatches.Patch(color='blue', label='FN')
axs[2].legend(handles=[green, yellow, blue], loc='lower right')
plt.tight_layout()
plt.show()
przed treningiem CNN X_train: (776987, 5, 5, 1) y_train: (776987,) X_val: (194247, 5, 5, 1) y_val: (194247,) Liczba pikseli klasa 0 w train: 388493 klasa 1 w train: 388494 Liczba pikseli klasa 0 w val: 97124 klasa 1 w val: 97123
Model: "functional_9"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ input_layer_9 (InputLayer) │ (None, 5, 5, 1) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_81 (Conv2D) │ (None, 5, 5, 16) │ 160 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_18 (MaxPooling2D) │ (None, 2, 2, 16) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_82 (Conv2D) │ (None, 2, 2, 32) │ 4,640 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_19 (MaxPooling2D) │ (None, 1, 1, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten_2 (Flatten) │ (None, 32) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_4 (Dense) │ (None, 64) │ 2,112 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_16 (Dropout) │ (None, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_5 (Dense) │ (None, 1) │ 65 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 6,977 (27.25 KB)
Trainable params: 6,977 (27.25 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/20 6071/6071 - 29s - 5ms/step - accuracy: 0.7425 - loss: 0.5179 - val_accuracy: 0.8015 - val_loss: 0.4388 - learning_rate: 1.0000e-04 Epoch 2/20 6071/6071 - 26s - 4ms/step - accuracy: 0.8159 - loss: 0.4085 - val_accuracy: 0.8263 - val_loss: 0.3870 - learning_rate: 1.0000e-04 Epoch 3/20 6071/6071 - 25s - 4ms/step - accuracy: 0.8268 - loss: 0.3883 - val_accuracy: 0.8300 - val_loss: 0.3798 - learning_rate: 1.0000e-04 Epoch 4/20 6071/6071 - 24s - 4ms/step - accuracy: 0.8304 - loss: 0.3832 - val_accuracy: 0.8318 - val_loss: 0.3780 - learning_rate: 1.0000e-04 Epoch 5/20 6071/6071 - 27s - 4ms/step - accuracy: 0.8321 - loss: 0.3805 - val_accuracy: 0.8345 - val_loss: 0.3734 - learning_rate: 1.0000e-04 Epoch 6/20 6071/6071 - 27s - 5ms/step - accuracy: 0.8335 - loss: 0.3778 - val_accuracy: 0.8348 - val_loss: 0.3721 - learning_rate: 1.0000e-04 Epoch 7/20 6071/6071 - 29s - 5ms/step - accuracy: 0.8344 - loss: 0.3762 - val_accuracy: 0.8325 - val_loss: 0.3771 - learning_rate: 1.0000e-04 Epoch 8/20 6071/6071 - 27s - 5ms/step - accuracy: 0.8352 - loss: 0.3749 - val_accuracy: 0.8344 - val_loss: 0.3735 - learning_rate: 1.0000e-04 Epoch 9/20 6071/6071 - 28s - 5ms/step - accuracy: 0.8357 - loss: 0.3738 - val_accuracy: 0.8376 - val_loss: 0.3684 - learning_rate: 1.0000e-04 Epoch 10/20 6071/6071 - 28s - 5ms/step - accuracy: 0.8364 - loss: 0.3726 - val_accuracy: 0.8364 - val_loss: 0.3712 - learning_rate: 1.0000e-04 Epoch 11/20 6071/6071 - 27s - 4ms/step - accuracy: 0.8370 - loss: 0.3718 - val_accuracy: 0.8386 - val_loss: 0.3664 - learning_rate: 1.0000e-04 Epoch 12/20 6071/6071 - 28s - 5ms/step - accuracy: 0.8374 - loss: 0.3708 - val_accuracy: 0.8378 - val_loss: 0.3669 - learning_rate: 1.0000e-04 Epoch 13/20 6071/6071 - 29s - 5ms/step - accuracy: 0.8378 - loss: 0.3700 - val_accuracy: 0.8383 - val_loss: 0.3684 - learning_rate: 1.0000e-04 Epoch 14/20 6071/6071 - 28s - 5ms/step - accuracy: 0.8382 - loss: 0.3693 - val_accuracy: 0.8397 - val_loss: 0.3646 - learning_rate: 1.0000e-04 Epoch 15/20 6071/6071 - 28s - 5ms/step - accuracy: 0.8388 - loss: 0.3687 - val_accuracy: 0.8390 - val_loss: 0.3664 - learning_rate: 1.0000e-04 Epoch 16/20 6071/6071 - 28s - 5ms/step - accuracy: 0.8388 - loss: 0.3681 - val_accuracy: 0.8407 - val_loss: 0.3636 - learning_rate: 1.0000e-04 Epoch 17/20 6071/6071 - 27s - 4ms/step - accuracy: 0.8395 - loss: 0.3671 - val_accuracy: 0.8381 - val_loss: 0.3672 - learning_rate: 1.0000e-04 Epoch 18/20 6071/6071 - 29s - 5ms/step - accuracy: 0.8396 - loss: 0.3668 - val_accuracy: 0.8414 - val_loss: 0.3618 - learning_rate: 1.0000e-04 Epoch 19/20 6071/6071 - 27s - 4ms/step - accuracy: 0.8401 - loss: 0.3660 - val_accuracy: 0.8392 - val_loss: 0.3650 - learning_rate: 1.0000e-04 Epoch 20/20 6071/6071 - 29s - 5ms/step - accuracy: 0.8404 - loss: 0.3657 - val_accuracy: 0.8410 - val_loss: 0.3618 - learning_rate: 1.0000e-04 6071/6071 ━━━━━━━━━━━━━━━━━━━━ 6s 973us/step Wybrany próg z ROC (Youden’s J): 0.5235 13072/13072 ━━━━━━━━━━━━━━━━━━━━ 12s 944us/step im0162 Accuracy: 0.7838 Sensitivity: 0.1477 Specificity: 0.8998 Confusion matrix: [[TN:322300, FP:35903] [FN:55653, TP:9644]]
*U-NET* DROPOUT 0.3 -> 0.2 POS_WEIGHT 6
In [ ]:
import os
import gzip
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, roc_curve
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, Dropout
)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
pos_weight = 6
def weighted_bce(y_true, y_pred):
bce = tf.keras.losses.binary_crossentropy(y_true, y_pred)
y2 = tf.squeeze(y_true, axis=-1)
weight = 1 + (pos_weight - 1) * y2
return tf.reduce_mean(bce * weight)
base_dir = "."
images_dir = os.path.join(base_dir, "star_images")
ah_masks_dir = os.path.join(base_dir, "labels_ah")
vk_masks_dir = os.path.join(base_dir, "lables_vk")
def create_field_of_view_mask(gray):
_, thresh = cv2.threshold(
gray, 0, 255,
cv2.THRESH_OTSU | cv2.THRESH_BINARY
)
ker = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, ker)
cnts, _ = cv2.findContours(
closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
if not cnts:
return (closed > 0).astype(np.uint8)
cnt = max(cnts, key=cv2.contourArea)
mask = np.zeros_like(gray, dtype=np.uint8)
cv2.drawContours(mask, [cnt], -1, 255, thickness=-1)
return (mask > 0).astype(np.uint8)
IMG_SIZE = 256
files = sorted(glob(os.path.join(images_dir, "*.ppm.gz")))
def postprocess_mask(mask_bin):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
opened = cv2.morphologyEx(mask_bin, cv2.MORPH_OPEN, kernel)
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel)
return closed
X, Y = [], []
X_color_orig = []
for fn in files:
name = os.path.basename(fn).replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, f"{name}.ppm.gz"))
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_field_of_view_mask(gray)
gray = gray * fov
orig_color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p)>0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int)+m_vk.astype(int))>=1).astype(np.uint8)
mask = mask * fov
gray_r = cv2.resize(gray, (IMG_SIZE,IMG_SIZE), interpolation=cv2.INTER_AREA)
mask_r = cv2.resize(mask, (IMG_SIZE,IMG_SIZE), interpolation=cv2.INTER_NEAREST)
gray_r = gray_r.astype(np.float32)/255.0
mask_r = mask_r.astype(np.float32)
X.append(np.expand_dims(gray_r, axis=-1))
Y.append(np.expand_dims(mask_r, axis=-1))
X_color_orig.append(orig_color)
X = np.stack(X, axis=0) # (N, H, W, 1)
Y = np.stack(Y, axis=0) # (N, H, W, 1)
X_tmp, X_test, Y_tmp, Y_test, Xc_tmp, Xc_test, files_tmp, files_test = train_test_split(
X, Y, X_color_orig, files, test_size=0.2, random_state=42
)
X_train, X_val, Y_train, Y_val, Xc_train, Xc_val, files_train, files_val = train_test_split(
X_tmp, Y_tmp, Xc_tmp, files_tmp, test_size=0.2, random_state=42
)
print("Train:", X_train.shape, Y_train.shape)
print("Val: ", X_val.shape, Y_val.shape)
print("Test: ", X_test.shape, Y_test.shape)
def unet(input_shape=(IMG_SIZE, IMG_SIZE, 1)):
inp = Input(input_shape)
c1 = Conv2D(64, 3, activation="relu", padding="same")(inp)
c1 = Conv2D(64, 3, activation="relu", padding="same")(c1)
p1 = MaxPooling2D()(c1)
c2 = Conv2D(128, 3, activation="relu", padding="same")(p1)
c2 = Conv2D(128, 3, activation="relu", padding="same")(c2)
p2 = MaxPooling2D()(c2)
c3 = Conv2D(256, 3, activation="relu", padding="same")(p2)
c3 = Conv2D(256, 3, activation="relu", padding="same")(c3)
c3 = Dropout(0.3)(c3)
u4 = Conv2DTranspose(128, 2, strides=2, padding="same")(c3)
m4 = concatenate([u4, c2])
c4 = Conv2D(128, 3, activation="relu", padding="same")(m4)
c4 = Conv2D(128, 3, activation="relu", padding="same")(c4)
c4 = Dropout(0.2)(c4)
u5 = Conv2DTranspose(64, 2, strides=2, padding="same")(c4)
m5 = concatenate([u5, c1])
c5 = Conv2D(64, 3, activation="relu", padding="same")(m5)
c5 = Conv2D(64, 3, activation="relu", padding="same")(c5)
out = Conv2D(1, 1, activation="sigmoid")(c5)
return Model(inp, out)
model = unet(input_shape=(IMG_SIZE,IMG_SIZE,1))
model.compile(
optimizer="adam",
loss=weighted_bce,
metrics=["accuracy"]
)
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True),
ModelCheckpoint("best_unet.keras", save_best_only=True),
ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=3, min_lr=1e-6)
]
history = model.fit(
X_train, Y_train,
validation_data=(X_val, Y_val),
epochs=20,
batch_size=8,
callbacks=callbacks,
verbose=2
)
probs_val_flat = model.predict(X_val).flatten()
y_val_flat = Y_val.flatten()
fpr, tpr, th = roc_curve(y_val_flat, probs_val_flat)
best_idx = np.argmax(tpr - fpr)
best_thr = th[best_idx]
probs_test_flat = model.predict(X_test).flatten()
y_test_flat = Y_test.flatten()
print("delta\tSpec\tSens")
for delta in [0.00, 0.05, 0.10, 0.15]:
thr = best_thr + delta
y_pred = (probs_test_flat >= thr).astype(np.uint8)
tn, fp, fn, tp = confusion_matrix(y_test_flat, y_pred).ravel()
spec = tn / (tn + fp) if (tn+fp)>0 else 0
sens = tp / (tp + fn) if (tp+fn)>0 else 0
print(f"{delta:.2f}\t{spec:.3f}\t{sens:.3f}")
delta = 0.05
thr = best_thr + delta
y_pred_final = (probs_test_flat >= thr).astype(np.uint8)
tn, fp, fn, tp = confusion_matrix(y_test_flat, y_pred_final).ravel()
acc = accuracy_score(y_test_flat, y_pred_final)
sens = recall_score(y_test_flat, y_pred_final)
spec = tn / (tn + fp)
print(f"\nFinal: Acc:{acc:.3f}, Sens:{sens:.3f}, Spec:{spec:.3f}")
print(f"Confusion matrix:\n[[TN:{tn}, FP:{fp}]\n [FN:{fn}, TP:{tp}]]")
pred_masks256 = y_pred_final.reshape(-1, IMG_SIZE, IMG_SIZE)
print("\nMacierz pomyłek dla każdego obrazu testowego")
print("-" * 60)
print(f"{'Name':<20} | {'TN':>8} {'FP':>8} {'FN':>8} {'TP':>8}")
print("-" * 60)
for i in range(len(Xc_test)):
name = os.path.basename(files_test[i]).replace(".ppm.gz", "")
gt_mask_256 = Y_test[i].squeeze()
pred_mask_256 = pred_masks256[i]
y_true_flat = gt_mask_256.flatten().astype(np.uint8)
y_pred_flat = pred_mask_256.flatten().astype(np.uint8)
tn, fp, fn, tp = confusion_matrix(y_true_flat, y_pred_flat, labels=[0, 1]).ravel()
print(f"{name:<20} | {tn:>8} {fp:>8} {fn:>8} {tp:>8}")
print("-" * 60)
indices = [0, 1, 3]
fig, axes = plt.subplots(len(indices), 3, figsize=(18, 6*len(indices)))
if len(indices) == 1:
axes = np.array([axes])
for row, idx in enumerate(indices):
if idx >= len(Xc_test):
print(f"Indeks {idx} jest poza zakresem zbioru testowego.")
continue
orig_full = Xc_test[idx]
correct_filepath = files_test[idx]
name = os.path.basename(correct_filepath).replace(".ppm.gz","")
ah_path = os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")
vk_path = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
if not os.path.exists(ah_path): continue
ah_img = load_ppm_gz(ah_path)
if ah_img is None: continue
ah = ah_img > 0
vk_img = load_ppm_gz(vk_path)
vk = (vk_img > 0) if vk_img is not None else np.zeros_like(ah)
gt_full = ((ah.astype(int) + vk.astype(int)) >= 1).astype(np.uint8)
pm256 = pred_masks256[idx].astype(np.uint8)
pm_up = cv2.resize(pm256, (orig_full.shape[1], orig_full.shape[0]), interpolation=cv2.INTER_NEAREST)
y_t = gt_full.flatten()
y_p = pm_up.flatten()
tn, fp, fn, tp = confusion_matrix(y_t, y_p, labels=[0, 1]).ravel()
acc = (tn+tp)/(tn+tp+fn+fp) if (tn+tp+fn+fp) > 0 else 0
sens = tp/(tp+fn) if tp+fn > 0 else 0
spec = tn/(tn+fp) if tn+fp > 0 else 0
overlay = orig_full.copy()
overlay[(gt_full==1)&(pm_up==1)] = [0,255,0]
overlay[(gt_full==0)&(pm_up==1)] = [255, 105, 180]
overlay[(gt_full==1)&(pm_up==0)] = [0,0,255]
ax0, ax1, ax2 = axes[row]
ax0.imshow(orig_full)
ax0.set_title(f"Oryginał (name: {name})")
ax0.axis("off")
ax1.imshow(gt_full, cmap="gray")
ax1.set_title("GT mask")
ax1.axis("off")
ax2.imshow(overlay)
ax2.set_title(f"Predykcja\nAcc:{acc:.2f} Sens:{sens:.2f} Spec:{spec:.2f}")
green_patch = mpatches.Patch(color='green', label='TP')
pink_patch = mpatches.Patch(color='pink', label='FP')
blue_patch = mpatches.Patch(color='blue', label='FN')
ax2.legend(handles=[green_patch, yellow_patch, blue_patch], loc='lower right')
ax2.axis("off")
plt.tight_layout()
plt.show()
Train: (12, 256, 256, 1) (12, 256, 256, 1) Val: (4, 256, 256, 1) (4, 256, 256, 1) Test: (4, 256, 256, 1) (4, 256, 256, 1) Epoch 1/20 2/2 - 13s - 7s/step - accuracy: 0.6154 - loss: 1.0966 - val_accuracy: 0.3970 - val_loss: 1.1260 - learning_rate: 0.0010 Epoch 2/20 2/2 - 11s - 5s/step - accuracy: 0.5588 - loss: 1.0905 - val_accuracy: 0.4104 - val_loss: 1.1144 - learning_rate: 0.0010 Epoch 3/20 2/2 - 20s - 10s/step - accuracy: 0.4181 - loss: 1.0753 - val_accuracy: 0.4325 - val_loss: 1.0744 - learning_rate: 0.0010 Epoch 4/20 2/2 - 10s - 5s/step - accuracy: 0.4345 - loss: 1.0250 - val_accuracy: 0.8531 - val_loss: 1.0907 - learning_rate: 0.0010 Epoch 5/20 2/2 - 11s - 5s/step - accuracy: 0.8158 - loss: 0.9859 - val_accuracy: 0.4120 - val_loss: 0.9801 - learning_rate: 0.0010 Epoch 6/20 2/2 - 11s - 5s/step - accuracy: 0.4187 - loss: 0.9322 - val_accuracy: 0.3995 - val_loss: 0.9532 - learning_rate: 0.0010 Epoch 7/20 2/2 - 11s - 5s/step - accuracy: 0.4095 - loss: 0.9078 - val_accuracy: 0.4091 - val_loss: 0.9517 - learning_rate: 0.0010 Epoch 8/20 2/2 - 11s - 5s/step - accuracy: 0.4218 - loss: 0.9065 - val_accuracy: 0.5514 - val_loss: 0.9400 - learning_rate: 0.0010 Epoch 9/20 2/2 - 20s - 10s/step - accuracy: 0.5231 - loss: 0.8971 - val_accuracy: 0.9054 - val_loss: 0.9426 - learning_rate: 0.0010 Epoch 10/20 2/2 - 11s - 5s/step - accuracy: 0.9093 - loss: 0.8931 - val_accuracy: 0.7394 - val_loss: 0.9239 - learning_rate: 0.0010 Epoch 11/20 2/2 - 11s - 5s/step - accuracy: 0.6097 - loss: 0.8815 - val_accuracy: 0.9034 - val_loss: 0.9149 - learning_rate: 0.0010 Epoch 12/20 2/2 - 11s - 5s/step - accuracy: 0.8844 - loss: 0.8671 - val_accuracy: 0.8993 - val_loss: 0.9250 - learning_rate: 0.0010 Epoch 13/20 2/2 - 11s - 5s/step - accuracy: 0.8780 - loss: 0.8653 - val_accuracy: 0.3961 - val_loss: 0.9608 - learning_rate: 0.0010 Epoch 14/20 2/2 - 11s - 5s/step - accuracy: 0.5648 - loss: 0.9048 - val_accuracy: 0.8852 - val_loss: 0.9421 - learning_rate: 0.0010 Epoch 15/20 2/2 - 20s - 10s/step - accuracy: 0.8975 - loss: 0.8780 - val_accuracy: 0.9094 - val_loss: 0.8940 - learning_rate: 5.0000e-04 Epoch 16/20 2/2 - 11s - 5s/step - accuracy: 0.7763 - loss: 0.8410 - val_accuracy: 0.4037 - val_loss: 0.9021 - learning_rate: 5.0000e-04 Epoch 17/20 2/2 - 11s - 5s/step - accuracy: 0.4146 - loss: 0.8649 - val_accuracy: 0.4403 - val_loss: 0.8797 - learning_rate: 5.0000e-04 Epoch 18/20 2/2 - 11s - 5s/step - accuracy: 0.5628 - loss: 0.8359 - val_accuracy: 0.9111 - val_loss: 0.8896 - learning_rate: 5.0000e-04 Epoch 19/20 2/2 - 11s - 5s/step - accuracy: 0.9121 - loss: 0.8472 - val_accuracy: 0.9069 - val_loss: 0.8955 - learning_rate: 5.0000e-04 Epoch 20/20 2/2 - 11s - 5s/step - accuracy: 0.9102 - loss: 0.8381 - val_accuracy: 0.6012 - val_loss: 0.8633 - learning_rate: 5.0000e-04 WARNING:tensorflow:5 out of the last 13080 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x76fdc26ab060> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details. 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 738ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 650ms/step delta Spec Sens 0.00 0.819 0.784 0.05 0.987 0.318 0.10 0.998 0.109 0.15 1.000 0.029 Final: Acc:0.925, Sens:0.318, Spec:0.987 Confusion matrix: [[TN:234544, FP:2983] [FN:16796, TP:7821]] Macierz pomyłek dla każdego obrazu testowego ------------------------------------------------------------ Name | TN FP FN TP ------------------------------------------------------------ im0001 | 57359 1374 4532 2271 im0291 | 60908 226 3109 1293 im0240 | 56871 150 5963 2552 im0002 | 59406 1233 3192 1705 ------------------------------------------------------------
*U-NET* DROPOUT 0.2 -> 0.1 POS_WEIGHT 5
In [ ]:
import os
import gzip
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, roc_curve
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, Dropout
)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
pos_weight = 5
def weighted_bce(y_true, y_pred):
bce = tf.keras.losses.binary_crossentropy(y_true, y_pred)
y2 = tf.squeeze(y_true, axis=-1)
weight = 1 + (pos_weight - 1) * y2
return tf.reduce_mean(bce * weight)
base_dir = "."
images_dir = os.path.join(base_dir, "star_images")
ah_masks_dir = os.path.join(base_dir, "labels_ah")
vk_masks_dir = os.path.join(base_dir, "lables_vk")
def create_field_of_view_mask(gray):
_, thresh = cv2.threshold(
gray, 0, 255,
cv2.THRESH_OTSU | cv2.THRESH_BINARY
)
ker = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, ker)
cnts, _ = cv2.findContours(
closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
if not cnts:
return (closed > 0).astype(np.uint8)
cnt = max(cnts, key=cv2.contourArea)
mask = np.zeros_like(gray, dtype=np.uint8)
cv2.drawContours(mask, [cnt], -1, 255, thickness=-1)
return (mask > 0).astype(np.uint8)
IMG_SIZE = 256
files = sorted(glob(os.path.join(images_dir, "*.ppm.gz")))
def postprocess_mask(mask_bin):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
opened = cv2.morphologyEx(mask_bin, cv2.MORPH_OPEN, kernel)
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel)
return closed
X, Y = [], []
X_color_orig = []
for fn in files:
name = os.path.basename(fn).replace(".ppm.gz", "")
img = load_ppm_gz(os.path.join(images_dir, f"{name}.ppm.gz"))
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = apply_preprocessing(rgb)
fov = create_field_of_view_mask(gray)
gray = gray * fov
orig_color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
m_ah = load_ppm_gz(os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")) > 0
vk_p = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
m_vk = (load_ppm_gz(vk_p)>0) if os.path.exists(vk_p) else np.zeros_like(m_ah)
mask = ((m_ah.astype(int)+m_vk.astype(int))>=1).astype(np.uint8)
mask = mask * fov
gray_r = cv2.resize(gray, (IMG_SIZE,IMG_SIZE), interpolation=cv2.INTER_AREA)
mask_r = cv2.resize(mask, (IMG_SIZE,IMG_SIZE), interpolation=cv2.INTER_NEAREST)
gray_r = gray_r.astype(np.float32)/255.0
mask_r = mask_r.astype(np.float32)
X.append(np.expand_dims(gray_r, axis=-1))
Y.append(np.expand_dims(mask_r, axis=-1))
X_color_orig.append(orig_color)
X = np.stack(X, axis=0) # (N, H, W, 1)
Y = np.stack(Y, axis=0) # (N, H, W, 1)
X_tmp, X_test, Y_tmp, Y_test, Xc_tmp, Xc_test, files_tmp, files_test = train_test_split(
X, Y, X_color_orig, files, test_size=0.2, random_state=42
)
X_train, X_val, Y_train, Y_val, Xc_train, Xc_val, files_train, files_val = train_test_split(
X_tmp, Y_tmp, Xc_tmp, files_tmp, test_size=0.2, random_state=42
)
print("Train:", X_train.shape, Y_train.shape)
print("Val: ", X_val.shape, Y_val.shape)
print("Test: ", X_test.shape, Y_test.shape)
def unet(input_shape=(IMG_SIZE, IMG_SIZE, 1)):
inp = Input(input_shape)
c1 = Conv2D(64, 3, activation="relu", padding="same")(inp)
c1 = Conv2D(64, 3, activation="relu", padding="same")(c1)
p1 = MaxPooling2D()(c1)
c2 = Conv2D(128, 3, activation="relu", padding="same")(p1)
c2 = Conv2D(128, 3, activation="relu", padding="same")(c2)
p2 = MaxPooling2D()(c2)
c3 = Conv2D(256, 3, activation="relu", padding="same")(p2)
c3 = Conv2D(256, 3, activation="relu", padding="same")(c3)
c3 = Dropout(0.2)(c3)
u4 = Conv2DTranspose(128, 2, strides=2, padding="same")(c3)
m4 = concatenate([u4, c2])
c4 = Conv2D(128, 3, activation="relu", padding="same")(m4)
c4 = Conv2D(128, 3, activation="relu", padding="same")(c4)
c4 = Dropout(0.1)(c4)
u5 = Conv2DTranspose(64, 2, strides=2, padding="same")(c4)
m5 = concatenate([u5, c1])
c5 = Conv2D(64, 3, activation="relu", padding="same")(m5)
c5 = Conv2D(64, 3, activation="relu", padding="same")(c5)
out = Conv2D(1, 1, activation="sigmoid")(c5)
return Model(inp, out)
model = unet(input_shape=(IMG_SIZE,IMG_SIZE,1))
model.compile(
optimizer="adam",
loss=weighted_bce,
metrics=["accuracy"]
)
callbacks = [
EarlyStopping(patience=5, restore_best_weights=True),
ModelCheckpoint("best_unet2.keras", save_best_only=True),
ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=3, min_lr=1e-6)
]
history = model.fit(
X_train, Y_train,
validation_data=(X_val, Y_val),
epochs=20,
batch_size=8,
callbacks=callbacks,
verbose=2
)
probs_val_flat = model.predict(X_val).flatten()
y_val_flat = Y_val.flatten()
fpr, tpr, th = roc_curve(y_val_flat, probs_val_flat)
best_idx = np.argmax(tpr - fpr)
best_thr = th[best_idx]
probs_test_flat = model.predict(X_test).flatten()
y_test_flat = Y_test.flatten()
print("delta\tSpec\tSens")
for delta in [0.00, 0.05, 0.10, 0.15]:
thr = best_thr + delta
y_pred = (probs_test_flat >= thr).astype(np.uint8)
tn, fp, fn, tp = confusion_matrix(y_test_flat, y_pred).ravel()
spec = tn / (tn + fp) if (tn+fp)>0 else 0
sens = tp / (tp + fn) if (tp+fn)>0 else 0
print(f"{delta:.2f}\t{spec:.3f}\t{sens:.3f}")
delta = 0.05
thr = best_thr + delta
y_pred_final = (probs_test_flat >= thr).astype(np.uint8)
tn, fp, fn, tp = confusion_matrix(y_test_flat, y_pred_final).ravel()
acc = accuracy_score(y_test_flat, y_pred_final)
sens = recall_score(y_test_flat, y_pred_final)
spec = tn / (tn + fp)
print(f"\nFinal: Acc:{acc:.3f}, Sens:{sens:.3f}, Spec:{spec:.3f}")
print(f"Confusion matrix:\n[[TN:{tn}, FP:{fp}]\n [FN:{fn}, TP:{tp}]]")
pred_masks256 = y_pred_final.reshape(-1, IMG_SIZE, IMG_SIZE)
print("\nMacierz pomyłek dla każdego obrazu testowego")
print("-" * 60)
print(f"{'Name':<20} | {'TN':>8} {'FP':>8} {'FN':>8} {'TP':>8}")
print("-" * 60)
for i in range(len(Xc_test)):
name = os.path.basename(files_test[i]).replace(".ppm.gz", "")
gt_mask_256 = Y_test[i].squeeze()
pred_mask_256 = pred_masks256[i]
y_true_flat = gt_mask_256.flatten().astype(np.uint8)
y_pred_flat = pred_mask_256.flatten().astype(np.uint8)
tn, fp, fn, tp = confusion_matrix(y_true_flat, y_pred_flat, labels=[0, 1]).ravel()
print(f"{name:<20} | {tn:>8} {fp:>8} {fn:>8} {tp:>8}")
print("-" * 60)
indices = [0, 1, 3]
fig, axes = plt.subplots(len(indices), 3, figsize=(18, 6*len(indices)))
if len(indices) == 1:
axes = np.array([axes])
for row, idx in enumerate(indices):
if idx >= len(Xc_test):
print(f"Indeks {idx} jest poza zakresem zbioru testowego.")
continue
orig_full = Xc_test[idx]
correct_filepath = files_test[idx]
name = os.path.basename(correct_filepath).replace(".ppm.gz","")
ah_path = os.path.join(ah_masks_dir, f"{name}.ah.ppm.gz")
vk_path = os.path.join(vk_masks_dir, f"{name}.vk.ppm.gz")
if not os.path.exists(ah_path): continue
ah_img = load_ppm_gz(ah_path)
if ah_img is None: continue
ah = ah_img > 0
vk_img = load_ppm_gz(vk_path)
vk = (vk_img > 0) if vk_img is not None else np.zeros_like(ah)
gt_full = ((ah.astype(int) + vk.astype(int)) >= 1).astype(np.uint8)
pm256 = pred_masks256[idx].astype(np.uint8)
pm_up = cv2.resize(pm256, (orig_full.shape[1], orig_full.shape[0]), interpolation=cv2.INTER_NEAREST)
y_t = gt_full.flatten()
y_p = pm_up.flatten()
tn, fp, fn, tp = confusion_matrix(y_t, y_p, labels=[0, 1]).ravel()
acc = (tn+tp)/(tn+tp+fn+fp) if (tn+tp+fn+fp) > 0 else 0
sens = tp/(tp+fn) if tp+fn > 0 else 0
spec = tn/(tn+fp) if tn+fp > 0 else 0
overlay = orig_full.copy()
overlay[(gt_full==1)&(pm_up==1)] = [0,255,0]
overlay[(gt_full==0)&(pm_up==1)] = [255, 105, 180]
overlay[(gt_full==1)&(pm_up==0)] = [0,0,255]
ax0, ax1, ax2 = axes[row]
ax0.imshow(orig_full)
ax0.set_title(f"Oryginał (name: {name})")
ax0.axis("off")
ax1.imshow(gt_full, cmap="gray")
ax1.set_title("GT mask")
ax1.axis("off")
ax2.imshow(overlay)
ax2.set_title(f"Predykcja\nAcc:{acc:.2f} Sens:{sens:.2f} Spec:{spec:.2f}")
green_patch = mpatches.Patch(color='green', label='TP')
pink_patch = mpatches.Patch(color='pink', label='FP')
blue_patch = mpatches.Patch(color='blue', label='FN')
ax2.legend(handles=[green_patch, yellow_patch, blue_patch], loc='lower right')
ax2.axis("off")
plt.tight_layout()
plt.show()
Train: (12, 256, 256, 1) (12, 256, 256, 1) Val: (4, 256, 256, 1) (4, 256, 256, 1) Test: (4, 256, 256, 1) (4, 256, 256, 1) Epoch 1/20
2025-06-21 04:46:40.745474: E external/local_xla/xla/stream_executor/cuda/cuda_platform.cc:51] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303)
2/2 - 13s - 7s/step - accuracy: 0.4163 - loss: 1.0154 - val_accuracy: 0.8733 - val_loss: 1.0429 - learning_rate: 0.0010 Epoch 2/20 2/2 - 11s - 5s/step - accuracy: 0.8834 - loss: 1.0076 - val_accuracy: 0.8733 - val_loss: 1.0316 - learning_rate: 0.0010 Epoch 3/20 2/2 - 12s - 6s/step - accuracy: 0.8834 - loss: 0.9960 - val_accuracy: 0.8733 - val_loss: 1.0045 - learning_rate: 0.0010 Epoch 4/20 2/2 - 11s - 5s/step - accuracy: 0.8834 - loss: 0.9592 - val_accuracy: 0.8737 - val_loss: 0.9067 - learning_rate: 0.0010 Epoch 5/20 2/2 - 11s - 5s/step - accuracy: 0.8839 - loss: 0.8572 - val_accuracy: 0.8743 - val_loss: 0.8651 - learning_rate: 0.0010 Epoch 6/20 2/2 - 11s - 6s/step - accuracy: 0.8866 - loss: 0.8265 - val_accuracy: 0.8733 - val_loss: 0.8646 - learning_rate: 0.0010 Epoch 7/20 2/2 - 11s - 5s/step - accuracy: 0.8834 - loss: 0.8277 - val_accuracy: 0.8810 - val_loss: 0.8572 - learning_rate: 0.0010 Epoch 8/20 2/2 - 10s - 5s/step - accuracy: 0.8764 - loss: 0.8165 - val_accuracy: 0.4789 - val_loss: 0.8596 - learning_rate: 0.0010 Epoch 9/20 2/2 - 10s - 5s/step - accuracy: 0.6360 - loss: 0.8184 - val_accuracy: 0.8788 - val_loss: 0.8481 - learning_rate: 0.0010 Epoch 10/20 2/2 - 20s - 10s/step - accuracy: 0.8905 - loss: 0.8060 - val_accuracy: 0.9104 - val_loss: 0.8299 - learning_rate: 0.0010 Epoch 11/20 2/2 - 11s - 5s/step - accuracy: 0.7895 - loss: 0.7950 - val_accuracy: 0.9039 - val_loss: 0.8231 - learning_rate: 0.0010 Epoch 12/20 2/2 - 11s - 5s/step - accuracy: 0.9047 - loss: 0.7846 - val_accuracy: 0.9172 - val_loss: 0.8057 - learning_rate: 0.0010 Epoch 13/20 2/2 - 20s - 10s/step - accuracy: 0.8098 - loss: 0.7661 - val_accuracy: 0.8984 - val_loss: 0.7888 - learning_rate: 0.0010 Epoch 14/20 2/2 - 11s - 5s/step - accuracy: 0.8980 - loss: 0.7645 - val_accuracy: 0.9186 - val_loss: 0.7740 - learning_rate: 0.0010 Epoch 15/20 2/2 - 10s - 5s/step - accuracy: 0.7562 - loss: 0.7592 - val_accuracy: 0.9179 - val_loss: 0.7669 - learning_rate: 0.0010 Epoch 16/20 2/2 - 11s - 5s/step - accuracy: 0.9123 - loss: 0.7523 - val_accuracy: 0.9125 - val_loss: 0.7696 - learning_rate: 0.0010 Epoch 17/20 2/2 - 12s - 6s/step - accuracy: 0.8007 - loss: 0.7382 - val_accuracy: 0.5525 - val_loss: 0.7635 - learning_rate: 0.0010 Epoch 18/20 2/2 - 11s - 5s/step - accuracy: 0.6622 - loss: 0.7311 - val_accuracy: 0.9185 - val_loss: 0.7492 - learning_rate: 0.0010 Epoch 19/20 2/2 - 10s - 5s/step - accuracy: 0.9201 - loss: 0.7111 - val_accuracy: 0.9195 - val_loss: 0.7313 - learning_rate: 0.0010 Epoch 20/20 2/2 - 10s - 5s/step - accuracy: 0.9089 - loss: 0.6927 - val_accuracy: 0.7841 - val_loss: 0.7169 - learning_rate: 0.0010 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 732ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 700ms/step delta Spec Sens 0.00 0.831 0.801 0.05 0.963 0.578 0.10 0.988 0.360 0.15 0.996 0.206 Final: Acc:0.927, Sens:0.578, Spec:0.963 Confusion matrix: [[TN:228658, FP:8869] [FN:10382, TP:14235]] Macierz pomyłek dla każdego obrazu testowego ------------------------------------------------------------ Name | TN FP FN TP ------------------------------------------------------------ im0001 | 55338 3395 2563 4240 im0291 | 59654 1480 2042 2360 im0240 | 56138 883 3878 4637 im0002 | 57528 3111 1899 2998 ------------------------------------------------------------